In [ ]:
import rasterio
import matplotlib.pyplot as plt
import numpy as np
# Path to your RGB image
image_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/sheridan_graves.tif"
# Read and plot RGB raster
with rasterio.open(image_path) as src:
# Read the three bands (assuming they are ordered as RGB in the file)
r, g, b = src.read([1, 2, 3])
# Stack the bands into an RGB image
rgb = np.stack((r, g, b), axis=-1)
# Normalize the bands to 0-1 range for visualization
# You might not need this step if your image is already in an appropriate scale
rgb_normalized = rgb / rgb.max()
plt.imshow(rgb_normalized)
plt.axis('off') # Turn off axis labels
plt.show()
In [ ]:
# Open the RGB image
with rasterio.open(image_path) as src:
# Read the three bands of the image (assuming the first three are Red, Green, and Blue)
r = src.read(1) # Red band
g = src.read(2) # Green band
b = src.read(3) # Blue band
# Replace no-data values with NaNs or another fill value of your choice
# Assuming that no-data values are already defined in the image metadata
no_data_value = src.nodata
r[r == no_data_value] = 0 # Or use a specific fill value
g[g == no_data_value] = 0
b[b == no_data_value] = 0
# Create a list of arrays representing the composite raster stack
crst = [r, g, b]
# Now crst is a list containing the 3 arrays of your image bands
# You can print the array shapes to confirm
for band in crst:
print(band.shape)
(1852, 2125) (1852, 2125) (1852, 2125)
In [ ]:
crst
Out[ ]:
[array([[101, 192, 220, ..., 149, 150, 152],
[145, 215, 190, ..., 152, 151, 153],
[191, 177, 141, ..., 150, 150, 150],
...,
[ 78, 69, 80, ..., 61, 132, 135],
[102, 104, 136, ..., 99, 107, 128],
[105, 117, 110, ..., 131, 61, 109]], dtype=uint8),
array([[100, 191, 221, ..., 148, 147, 149],
[143, 213, 190, ..., 151, 148, 150],
[189, 174, 140, ..., 149, 148, 148],
...,
[ 97, 86, 96, ..., 71, 145, 148],
[119, 121, 152, ..., 108, 117, 138],
[123, 134, 124, ..., 138, 71, 117]], dtype=uint8),
array([[ 80, 173, 203, ..., 100, 102, 104],
[128, 201, 180, ..., 103, 103, 105],
[177, 167, 135, ..., 101, 100, 100],
...,
[ 65, 50, 57, ..., 21, 91, 94],
[ 83, 79, 107, ..., 61, 64, 85],
[ 83, 89, 75, ..., 94, 21, 66]], dtype=uint8)]
In [ ]:
import os
import numpy as np
import rasterio
from rasterio.warp import reproject, Resampling
# Function to resample an array to match the target raster
def resample_to_raster(src_array, src_transform, src_crs, target_raster_path, resampling_method=Resampling.bilinear):
with rasterio.open(target_raster_path) as target_raster:
target_shape = target_raster.shape
target_transform = target_raster.transform
target_crs = target_raster.crs
resampled_array = np.empty((src_array.shape[0], target_shape[0], target_shape[1]), dtype=src_array.dtype)
for i in range(src_array.shape[0]): # Iterate over each band
reproject(
source=src_array[i],
destination=resampled_array[i],
src_transform=src_transform,
src_crs=src_crs,
dst_transform=target_transform,
dst_crs=target_crs,
resampling=resampling_method
)
return resampled_array
# Paths to your RGB image, training, and validation tif
rgb_image_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/sheridan_graves.tif"
train_raster_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/shgr_train.tif"
val_raster_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/shgr_val.tif"
# Open the RGB image to get crs and transform
with rasterio.open(rgb_image_path) as src:
src_crs = src.crs
src_transform = src.transform
# Read the three bands of the image (assuming the first three are Red, Green, and Blue)
rgb_array = np.array([src.read(1), src.read(2), src.read(3)])
# Resample the RGB stack to the training and validation datasets
train_resampled = resample_to_raster(rgb_array, src_transform, src_crs, train_raster_path)
val_resampled = resample_to_raster(rgb_array, src_transform, src_crs, val_raster_path)
# Now train_resampled and val_resampled contain the resampled RGB bands
# You can print the array shapes to confirm
print(train_resampled.shape)
print(val_resampled.shape)
(3, 3799, 9178) (3, 1559, 9178)
In [ ]:
val_resampled
Out[ ]:
array([[[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]],
[[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]],
[[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]]], dtype=uint8)
In [ ]:
import os
import pandas as pd
import numpy as np
import rasterio
# Helper function to flatten and combine RGB bands and class raster into a dataframe
def rgb_rasters_to_dataframe(rgb_rasters, class_raster_path):
# Read class raster
with rasterio.open(class_raster_path) as class_raster:
class_data = class_raster.read(1).flatten() # Assuming class labels are in band 1
# Flattening each band in the RGB raster
data = {f'band_{i+1}': band.flatten() for i, band in enumerate(rgb_rasters)}
# Adding class labels to the data dictionary
data['class'] = class_data
# Creating the dataframe
df = pd.DataFrame(data)
return df
# Paths to your training and validation class labels rasters
train_class_raster_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/shgr_train.tif"
val_class_raster_path = "C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/shgr_val.tif"
# Create dataframes for training and validation
train_df = rgb_rasters_to_dataframe(train_resampled, train_class_raster_path)
val_df = rgb_rasters_to_dataframe(val_resampled, val_class_raster_path)
# Combine training and validation dataframes
combined_df = pd.concat([train_df, val_df], ignore_index=True)
# Drop rows where class is 0 (assuming 0 is the no-data or background class) and rows with NaN values
combined_df.dropna(inplace=True)
#combined_df = combined_df[combined_df['class'] != 255]
#combined_df = combined_df[combined_df['class'] != 0]
In [ ]:
combined_df["class"].unique()
Out[ ]:
array([0, 5, 7, 1, 4, 9, 8, 6, 3, 2], dtype=uint8)
In [ ]:
combined_df[combined_df['class'] == 1]
combined_df['class'].value_counts()
Out[ ]:
class 0 37201481 1 3669376 9 2060369 7 1906062 5 1400922 8 1087760 2 886005 4 599260 3 360540 6 3949 Name: count, dtype: int64
In [ ]:
# Entfernen aller Zeilen mit Klassenwert 7
combined_df = combined_df.loc[combined_df['class'] != 7]
combined_df
Out[ ]:
| band_1 | band_2 | band_3 | class | |
|---|---|---|---|---|
| 6709652 | 82 | 87 | 66 | 6 |
| 6709653 | 79 | 84 | 64 | 6 |
| 6709654 | 76 | 81 | 62 | 6 |
| 6709655 | 73 | 78 | 60 | 6 |
| 6709656 | 70 | 75 | 58 | 6 |
| ... | ... | ... | ... | ... |
| 139207511 | 100 | 101 | 26 | 5 |
| 139207512 | 95 | 95 | 21 | 5 |
| 139207513 | 89 | 90 | 15 | 5 |
| 139207514 | 89 | 89 | 15 | 5 |
| 139207515 | 96 | 95 | 21 | 5 |
46559633 rows × 4 columns
In [ ]:
num_class_1 = combined_df[combined_df['class'] == 1].shape[0]
# Step 2: Randomly sample instances from class 0 to match the number in class 1
class_0_sampled = combined_df[combined_df['class'] == 0].sample(n=num_class_1, random_state=42)
# Step 3: Exclude original class 0 instances from the DataFrame
non_class_0 = combined_df[combined_df['class'] != 0]
# Step 4: Combine the sampled class 0 instances with all non-class 0 instances
balanced_df = pd.concat([class_0_sampled, non_class_0])
# Reset the index of the new balanced DataFrame
balanced_df.reset_index(drop=True, inplace=True)
In [ ]:
combined_df = balanced_df
combined_df[combined_df['class'] == 1]
combined_df['class'].value_counts()
Out[ ]:
class 0 3669376 1 3669376 9 2060369 7 1906062 5 1400922 8 1087760 2 886005 4 599260 3 360540 6 3949 Name: count, dtype: int64
In [ ]:
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
# Assuming 'combined_df' is your DataFrame and it includes a 'class' column
# Plot a histogram of the 'class' column
plt.hist(combined_df['class'], bins=np.arange(0, 9) - 0.5, edgecolor='black') # bins set explicitly for classes 1-7
plt.title('Class Distribution')
plt.xlabel('Class')
plt.ylabel('Frequency')
plt.xticks(np.arange(1, 8)) # set x-ticks to be at the center of bins for classes 1-7
plt.show()
# Convert the 'class' column to a numerical format starting from 0
combined_df['class'] = combined_df['class'] #- 1 # Subtract 1 to make classes start from 0
# Describe the DataFrame to check for Inf, NaNs, or other odd patterns
description = combined_df.describe(include='all') # include='all' to get statistics for all columns
print(description)
# Print out the schema of the DataFrame
print(combined_df.dtypes)
band_1 band_2 band_3 class count 1.564362e+07 1.564362e+07 1.564362e+07 1.564362e+07 mean 1.007855e+02 1.092795e+02 8.491793e+01 3.614009e+00 std 4.624586e+01 4.310098e+01 4.094368e+01 3.399555e+00 min 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 25% 7.100000e+01 8.000000e+01 6.000000e+01 1.000000e+00 50% 9.600000e+01 1.120000e+02 7.500000e+01 2.000000e+00 75% 1.240000e+02 1.330000e+02 1.000000e+02 7.000000e+00 max 2.550000e+02 2.550000e+02 2.550000e+02 9.000000e+00 band_1 uint8 band_2 uint8 band_3 uint8 class uint8 dtype: object
In [ ]:
from tqdm import tqdm
from sklearn.model_selection import cross_validate, StratifiedKFold
from sklearn.metrics import balanced_accuracy_score, cohen_kappa_score, make_scorer
from xgboost import XGBClassifier
import numpy as np
import pandas as pd
# Create a new class mapping because class '3' is missing after reindexing (original class '4')
unique_classes = combined_df['class'].unique()
class_mapping = {old_class: new_class for new_class, old_class in enumerate(sorted(unique_classes))}
# Apply the mapping to your target variable
combined_df['class_mapped'] = combined_df['class'].map(class_mapping)
# Now 'class_mapped' will have consecutive class labels starting from 0
y = combined_df['class_mapped'].values
X = combined_df.drop(columns=['class', 'class_mapped']).values
# Initialize the XGBoost classifier
xgb_classifier = XGBClassifier(
n_estimators=100, # Default is 100, consider reducing if runtime is too long
max_depth=10, # Default is 6, consider reducing if runtime is too long
learning_rate=0.4, # Default is 0.3, tuning might be required
random_state=123,
n_jobs=-1 # Use all available CPU cores
)
# Define the cross-validation strategy
cv_strategy = StratifiedKFold(n_splits=10, shuffle=True, random_state=123)
# Prepare lists to store the scores
balanced_accuracy_scores = []
kappa_scores = []
# Manually iterate through the splits and train the model
for train_index, test_index in tqdm(cv_strategy.split(X, y), total=cv_strategy.get_n_splits(), desc="Cross-validating"):
# Split the data
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Fit the model
xgb_classifier.fit(X_train, y_train)
# Predict and calculate scores for this fold
y_pred = xgb_classifier.predict(X_test)
# Store the scores
balanced_accuracy_scores.append(balanced_accuracy_score(y_test, y_pred))
kappa_scores.append(cohen_kappa_score(y_test, y_pred))
# Print the average of the scores
print(f"Balanced Accuracy: {np.mean(balanced_accuracy_scores)}")
print(f"Cohen's Kappa: {np.mean(kappa_scores)}")
Cross-validating: 100%|██████████| 10/10 [1:10:35<00:00, 423.54s/it]
Balanced Accuracy: 0.37340639120967506 Cohen's Kappa: 0.38464750836674844
In [ ]:
from tqdm.auto import tqdm
import xgboost as xgb
# Custom callback to use with XGBoost training
class XGBProgressCallback(xgb.callback.TrainingCallback):
def __init__(self, rounds):
self.pbar = tqdm(total=rounds)
def after_iteration(self, model, epoch, evals_log):
self.pbar.update(1)
return False # Return False to continue training
def after_training(self, model):
self.pbar.close()
return model
# Convert data to DMatrix format (required for XGBoost's native interface)
dtrain = xgb.DMatrix(X, label=y)
num_classes = np.unique(y).size
params = {
'objective': 'multi:softmax',
'eval_metric': 'mlogloss', # Use 'mlogloss' for multi-class problems
'num_class': num_classes, # Set the number of classes here
# ... include any other parameters you wish to set
}
# Proceed with the training
bst = xgb.train(params, dtrain, num_boost_round=100, callbacks=[XGBProgressCallback(100)])
0%| | 0/100 [00:00<?, ?it/s]
In [ ]:
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Predict the labels of the test set
y_pred = bst.predict(xgb.DMatrix(X_test))
# Compute the confusion matrix
cm = confusion_matrix(y_test, y_pred)
# Plot the confusion matrix
sns.heatmap(cm, annot=True, fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('Actual Label')
plt.xlabel('Predicted Label')
plt.show()
0 no data, grass, memorial, funeral_hall, playground, footway, bench, grave, park, tree
In [ ]:
# Check the shapes of all rasters in crst
raster_shapes = [raster.shape for raster in crst]
if not all(shape == raster_shapes[0] for shape in raster_shapes):
print("Not all rasters have the same shape:", raster_shapes)
else:
print("All rasters have the same shape:", raster_shapes[0])
# Create a DataFrame from the rasters regardless of their shape
# This part assumes that crst is defined and contains your raster data
dfn = pd.DataFrame({f'band_{i+1}': raster.flatten() for i, raster in enumerate(crst)})
All rasters have the same shape: (1852, 2125)
In [ ]:
from xgboost import XGBClassifier
from sklearn.metrics import balanced_accuracy_score, cohen_kappa_score
# Replace any non-finite values (like Inf) with 0
dfn.replace([np.inf, -np.inf], 0, inplace=True)
# Train the model
xgb_classifier.fit(X, y) # X and y should be your training data and labels
# Now you can make predictions
pred = xgb_classifier.predict(dfn)
nodata_value = -9999 # You can choose any valid int32 value as the nodata value
# Assuming 'srs' is one of the original rasters and has the same spatial dimensions as 'crst'
with rasterio.open(image_path) as src: # Assuming b8 is a path to one of your rasters
profile = src.profile
profile.update(dtype=rasterio.int32, count=1, nodata=nodata_value)
# Reshape the prediction array to match the spatial dimensions and write to a new raster file
pred_reshaped = pred.reshape(src.shape).astype(np.int32)
with rasterio.open('prediction.tif', 'w', **profile) as dst:
dst.write(pred_reshaped, 1)
plt.imshow(pred_reshaped, cmap='viridis') # Change colormap as needed
plt.colorbar()
plt.title('Prediction Raster')
plt.show()
0 no data 1 grass 2 memorial 3 funeral_hall 4 playground 5 footway 6 bench 7 grave 8 park 9 tree
In [ ]:
with rasterio.open(image_path) as src: # 'image_path' should be the path to your original raster
profile = src.profile
profile.update(dtype=rasterio.int32, count=1, nodata=nodata_value)
pred_reshaped = pred.reshape(src.shape).astype(np.int32)
with rasterio.open('prediction.tif', 'w', **profile) as dst:
dst.write(pred_reshaped, 1)
# Read and plot RGB raster
with rasterio.open("C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/sheridan_graves.tif") as src:
# Read the three bands (assuming they are ordered as RGB in the file)
r, g, b = src.read([1, 2, 3])
rgb = np.stack((r, g, b), axis=-1)
original_image = rgb / rgb.max()
plt.figure(figsize=(12, 6))
# Plot the original image
plt.subplot(1, 2, 1)
plt.imshow(original_image) # Use gray colormap for the original image
plt.colorbar()
plt.title('Original Image')
# Plot the prediction raster
plt.subplot(1, 2, 2)
plt.imshow(pred_reshaped, cmap='terrain') # Use a colormap that provides distinct colors for different classes
plt.colorbar()
plt.title('Prediction Raster')
plt.tight_layout()
plt.show()
In [ ]:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import ListedColormap
# Define custom colors for each class
class_colors = ["white", 'green', 'darkgrey', 'blue', 'indigo', 'purple', "pink", 'grey', 'lightgreen', 'darkgreen'] # 0 no data, grass, memorial, funeral_hall, playground, footway, bench, grave, park, tree
# Create a custom colormap using the ListedColormap
custom_cmap = ListedColormap(class_colors)
# Define the size of the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
# Plot the original RGB image
ax1.imshow(original_image)
ax1.set_title('Original Image')
ax1.set_xlabel('Pixel X Coordinate')
ax1.set_ylabel('Pixel Y Coordinate')
# Plot the prediction raster with the custom colormap
prediction_plot = ax2.imshow(pred_reshaped, cmap=custom_cmap)
ax2.set_title('Prediction Raster')
ax2.set_xlabel('Pixel X Coordinate')
ax2.set_ylabel('Pixel Y Coordinate')
# Create a divider to position the colorbar for the prediction raster
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
# Add a colorbar for the prediction raster with class labels
colorbar = plt.colorbar(prediction_plot, cax=cax, ticks=range(len(class_colors)))
colorbar.set_label('Class Labels')
colorbar.set_ticklabels([str(i) for i in range(len(class_colors))])
cax.yaxis.set_label_position('left') # Position colorbar labels to the left
plt.tight_layout()
plt.show()
class_colors = ["white", 'green', 'darkgrey', 'blue', 'indigo', 'purple', "pink", 'grey', 'lightgreen', 'darkgreen'] # 0 no data, grass, memorial, funeral_hall, playground, footway, bench, grave, park, tree
In [ ]:
# Step 1: Identify the Brown Class Index
brown_class_index = class_colors.index('green') # This should be 4
# Step 2: Count Brown Pixels
brown_pixels_count = np.sum(pred_reshaped == brown_class_index)
# Step 3: Calculate Total Pixels
total_pixels = pred_reshaped.size
# Step 4: Calculate Percentage
brown_percentage = (brown_pixels_count / total_pixels) * 100
print(f"Percentage of Green Park Class in the Image: {brown_percentage:.2f}%")
Percentage of Green Park Class in the Image: 23.05%
In [ ]:
import rasterio
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython.display import display
def plot_zoomable_image(image, center_x, center_y, zoom, title='Image', cmap=None):
# Check if the image is 2D or 3D and adjust accordingly
if image.ndim == 3:
height, width, _ = image.shape
elif image.ndim == 2:
height, width = image.shape
else:
raise ValueError("Image must be either 2D or 3D.")
zoomed_width = int(width / zoom)
zoomed_height = int(height / zoom)
# Calculate the boundaries of the zoomed image
x_min = max(center_x - zoomed_width // 2, 0)
x_max = min(x_min + zoomed_width, width)
y_min = max(center_y - zoomed_height // 2, 0)
y_max = min(y_min + zoomed_height, height)
# Adjust the minimums if the selected area exceeds the image boundaries
if x_max == width:
x_min = x_max - zoomed_width
if y_max == height:
y_min = y_max - zoomed_height
# Plot
plt.figure(figsize=(18, 9))
if image.ndim == 3:
plt.imshow(image[y_min:y_max, x_min:x_max], cmap=cmap)
elif image.ndim == 2:
plt.imshow(image[y_min:y_max, x_min:x_max], cmap=custom_cmap, aspect='auto')
plt.title(title)
plt.colorbar()
plt.show()
# Load your original and prediction images here
# For the original image
with rasterio.open("C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan_graves/sheridan_graves.tif") as src:
r, g, b = src.read([1, 2, 3])
original_image = np.stack((r, g, b), axis=-1) / 255.0 # Normalizing
# Assuming 'pred_reshaped' is already defined as your prediction raster
# pred_reshaped = ...
# Define widgets
center_x = widgets.IntSlider(min=0, max=original_image.shape[1], step=100, value=original_image.shape[1] // 2, description='Center X:')
center_y = widgets.IntSlider(min=0, max=original_image.shape[0], step=100, value=original_image.shape[0] // 2, description='Center Y:')
zoom = widgets.FloatSlider(min=1, max=10, value=1, step=0.1, description='Zoom:')
# Interactive function to update plots
def update_plots(center_x, center_y, zoom):
plot_zoomable_image(original_image, center_x, center_y, zoom, title='Original Image')
plot_zoomable_image(pred_reshaped, center_x, center_y, zoom, title='Prediction Raster', cmap=custom_cmap)
# Display widgets and link to the update function
interactive_plot = widgets.interactive(update_plots, center_x=center_x, center_y=center_y, zoom=zoom)
display(interactive_plot)
interactive(children=(IntSlider(value=1062, description='Center X:', max=2125, step=100), IntSlider(value=926,…
New images to predict¶
In [ ]:
import rasterio
import numpy as np
import matplotlib.pyplot as plt
from xgboost import XGBClassifier
# Assuming 'xgb_classifier' is your trained model
# Load the new image
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan/sheridan2.tif') as src:
new_image = src.read() # Reads all bands
new_image = np.moveaxis(new_image, 0, -1) # Reorder dimensions to [height, width, bands]
# Preprocess the new image (if necessary)
# For example, normalize the new image if the training data was normalized
# Flatten the new image to make predictions
new_image_flattened = new_image.reshape(-1, new_image.shape[2]) # Reshape to [samples, bands]
# Replace any non-finite values with 0 (or another appropriate value)
new_image_flattened[~np.isfinite(new_image_flattened)] = 0
# Predict with the trained model
predictions = xgb_classifier.predict(new_image_flattened)
# Reshape predictions to match the spatial dimensions of the new image
predictions_reshaped = predictions.reshape(new_image.shape[0], new_image.shape[1])
# Visualize the predictions
plt.imshow(predictions_reshaped, cmap='viridis') # Use an appropriate colormap
plt.colorbar()
plt.title('Predictions on New Image')
plt.show()
In [ ]:
# Define custom colors for each class
class_colors = class_colors = ["white", 'green', 'darkgrey', 'blue', 'indigo', 'purple', "pink", 'grey', 'lightgreen', 'darkgreen'] # 0 no data, grass, memorial, funeral_hall, playground, footway, bench, grave, park, tree
# Create a custom colormap using the ListedColormap
custom_cmap = ListedColormap(class_colors)
# Load the original RGB image to display alongside the predictions
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan/sheridan2.tif') as src:
original_image = src.read([1, 2, 3]) # Assuming the first three bands are RGB
original_image = np.moveaxis(original_image, 0, -1) / 255.0 # Normalize and reorder dimensions
# Define the size of the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
# Plot the original RGB image
ax1.imshow(original_image)
ax1.set_title('Original Image')
ax1.axis('off') # Hide axis ticks and labels
# Plot the prediction raster with the custom colormap
prediction_plot = ax2.imshow(predictions_reshaped, cmap=custom_cmap)
ax2.set_title('Prediction Raster')
ax2.axis('off') # Hide axis ticks and labels
# Create a divider to position the colorbar for the prediction raster
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
# Add a colorbar for the prediction raster with class labels
colorbar = plt.colorbar(prediction_plot, cax=cax, ticks=range(len(class_colors)))
colorbar.set_label('Class Labels')
colorbar.set_ticklabels([str(i) for i in range(len(class_colors))]) # Update labels as needed
cax.yaxis.set_label_position('left') # Position colorbar labels to the left
plt.tight_layout()
plt.show()
In [ ]:
# Load the new image
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan/sheridan3.tif') as src:
new_image = src.read() # Reads all bands
new_image = np.moveaxis(new_image, 0, -1) # Reorder dimensions to [height, width, bands]
# Preprocess the new image (if necessary)
# For example, normalize the new image if the training data was normalized
# Flatten the new image to make predictions
new_image_flattened = new_image.reshape(-1, new_image.shape[2]) # Reshape to [samples, bands]
# Replace any non-finite values with 0 (or another appropriate value)
new_image_flattened[~np.isfinite(new_image_flattened)] = 0
# Predict with the trained model
predictions = xgb_classifier.predict(new_image_flattened)
# Reshape predictions to match the spatial dimensions of the new image
predictions_reshaped = predictions.reshape(new_image.shape[0], new_image.shape[1])
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/sheridan/sheridan3.tif') as src:
original_image = src.read([1, 2, 3]) # Assuming the first three bands are RGB
original_image = np.moveaxis(original_image, 0, -1) / 255.0 # Normalize and reorder dimensions
# Define the size of the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
# Plot the original RGB image
ax1.imshow(original_image)
ax1.set_title('Original Image')
ax1.axis('off') # Hide axis ticks and labels
# Plot the prediction raster with the custom colormap
prediction_plot = ax2.imshow(predictions_reshaped, cmap=custom_cmap)
ax2.set_title('Prediction Raster')
ax2.axis('off') # Hide axis ticks and labels
# Create a divider to position the colorbar for the prediction raster
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
# Add a colorbar for the prediction raster with class labels
colorbar = plt.colorbar(prediction_plot, cax=cax, ticks=range(len(class_colors)))
colorbar.set_label('Class Labels')
colorbar.set_ticklabels([str(i) for i in range(len(class_colors))]) # Update labels as needed
cax.yaxis.set_label_position('left') # Position colorbar labels to the left
plt.tight_layout()
plt.show()
In [ ]:
# Load the new image
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/wittel/wittelsbacherpark.tif') as src:
new_image = src.read() # Reads all bands
new_image = np.moveaxis(new_image, 0, -1) # Reorder dimensions to [height, width, bands]
# Preprocess the new image (if necessary)
# For example, normalize the new image if the training data was normalized
# Flatten the new image to make predictions
new_image_flattened = new_image.reshape(-1, new_image.shape[2]) # Reshape to [samples, bands]
# Replace any non-finite values with 0 (or another appropriate value)
new_image_flattened[~np.isfinite(new_image_flattened)] = 0
# Predict with the trained model
predictions = xgb_classifier.predict(new_image_flattened)
# Reshape predictions to match the spatial dimensions of the new image
predictions_reshaped = predictions.reshape(new_image.shape[0], new_image.shape[1])
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/wittel/wittelsbacherpark.tif') as src:
original_image = src.read([1, 2, 3]) # Assuming the first three bands are RGB
original_image = np.moveaxis(original_image, 0, -1) / 255.0 # Normalize and reorder dimensions
# Define the size of the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
# Plot the original RGB image
ax1.imshow(original_image)
ax1.set_title('Original Image')
ax1.axis('off') # Hide axis ticks and labels
# Plot the prediction raster with the custom colormap
prediction_plot = ax2.imshow(predictions_reshaped, cmap=custom_cmap)
ax2.set_title('Prediction Raster')
ax2.axis('off') # Hide axis ticks and labels
# Create a divider to position the colorbar for the prediction raster
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
# Add a colorbar for the prediction raster with class labels
colorbar = plt.colorbar(prediction_plot, cax=cax, ticks=range(len(class_colors)))
colorbar.set_label('Class Labels')
colorbar.set_ticklabels([str(i) for i in range(len(class_colors))]) # Update labels as needed
cax.yaxis.set_label_position('left') # Position colorbar labels to the left
plt.tight_layout()
plt.show()
In [ ]:
# Load the new image
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/rotestor/rotestor.tif') as src:
new_image = src.read() # Reads all bands
new_image = np.moveaxis(new_image, 0, -1) # Reorder dimensions to [height, width, bands]
# Preprocess the new image (if necessary)
# For example, normalize the new image if the training data was normalized
# Flatten the new image to make predictions
new_image_flattened = new_image.reshape(-1, new_image.shape[2]) # Reshape to [samples, bands]
# Replace any non-finite values with 0 (or another appropriate value)
new_image_flattened[~np.isfinite(new_image_flattened)] = 0
# Predict with the trained model
predictions = xgb_classifier.predict(new_image_flattened)
# Reshape predictions to match the spatial dimensions of the new image
predictions_reshaped = predictions.reshape(new_image.shape[0], new_image.shape[1])
with rasterio.open('C:/Users/leoni/Documents/Uni/UGS/Project/Classification_data/Imagery/rotestor/rotestor.tif') as src:
original_image = src.read([1, 2, 3]) # Assuming the first three bands are RGB
original_image = np.moveaxis(original_image, 0, -1) / 255.0 # Normalize and reorder dimensions
# Define the size of the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 12))
# Plot the original RGB image
ax1.imshow(original_image)
ax1.set_title('Original Image')
ax1.axis('off') # Hide axis ticks and labels
# Plot the prediction raster with the custom colormap
prediction_plot = ax2.imshow(predictions_reshaped, cmap=custom_cmap)
ax2.set_title('Prediction Raster')
ax2.axis('off') # Hide axis ticks and labels
# Create a divider to position the colorbar for the prediction raster
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
# Add a colorbar for the prediction raster with class labels
colorbar = plt.colorbar(prediction_plot, cax=cax, ticks=range(len(class_colors)))
colorbar.set_label('Class Labels')
colorbar.set_ticklabels([str(i) for i in range(len(class_colors))]) # Update labels as needed
cax.yaxis.set_label_position('left') # Position colorbar labels to the left
plt.tight_layout()
plt.show()
In [ ]: